Print this page
OS-???? [lx] SIGEV_THREAD_ID emulation needed

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/brand/lx/os/lx_brand.c
          +++ new/usr/src/uts/common/brand/lx/os/lx_brand.c
↓ open down ↓ 982 lines elided ↑ open up ↑
 983  983                  }
 984  984  
 985  985                  if (copyout(&s_pid, (void *)arg2, sizeof (s_pid)) != 0 ||
 986  986                      copyout(&s_tid, (void *)arg3, sizeof (s_tid)) != 0) {
 987  987                          return (EFAULT);
 988  988                  }
 989  989  
 990  990                  return (0);
 991  991          }
 992  992  
      993 +        case B_SIGEV_THREAD_ID: {
      994 +                /*
      995 +                 * Emulate Linux's timer_create(2) SIGEV_THREAD_ID
      996 +                 * notification method. This mechanism is only meant
      997 +                 * for userland threading libraries such as glibc and
      998 +                 * is documented as such. Therefore, assume this is
      999 +                 * only ever invoked for the purpose of alerting a
     1000 +                 * Linux threading library. Assume that the tid is a
     1001 +                 * member of the caller's process and the signal
     1002 +                 * number is valid. See lx_sigev_thread_id() for the
     1003 +                 * userland side of this emulation.
     1004 +                 *
     1005 +                 * The return code from this function is not checked
     1006 +                 * by the caller since it executes in an asynchronous
     1007 +                 * context and there is nothing much to be done. If
     1008 +                 * this function does fail then it will manifest as
     1009 +                 * Linux threads waiting for a signal they will never
     1010 +                 * receive.
     1011 +                 *
     1012 +                 * arg1 -- Linux tid
     1013 +                 * arg2 -- signal number
     1014 +                 * arg3 -- union sigval
     1015 +                 */
     1016 +
     1017 +                int native_sig = lx_stol_signo((int)arg2, LX_SIGTIMER);
     1018 +                pid_t native_pid;
     1019 +                int native_tid;
     1020 +                sigqueue_t *sqp;
     1021 +
     1022 +                lx_lpid_to_spair((pid_t)arg1, &native_pid, &native_tid);
     1023 +                sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
     1024 +                mutex_enter(&curproc->p_lock);
     1025 +
     1026 +                if ((t = idtot(curproc, native_tid)) == NULL) {
     1027 +                        mutex_exit(&curproc->p_lock);
     1028 +                        kmem_free(sqp, sizeof (sigqueue_t));
     1029 +                        return (ESRCH);
     1030 +                }
     1031 +
     1032 +                sqp->sq_info.si_signo = native_sig;
     1033 +                sqp->sq_info.si_code = SI_TIMER;
     1034 +                sqp->sq_info.si_pid = curproc->p_pid;
     1035 +                sqp->sq_info.si_zoneid = getzoneid();
     1036 +                sqp->sq_info.si_uid = crgetruid(CRED());
     1037 +                sqp->sq_info.si_value = (union sigval)((void *)arg3);
     1038 +                sigaddqa(curproc, t, sqp);
     1039 +
     1040 +                mutex_exit(&curproc->p_lock);
     1041 +                kmem_free(sqp, sizeof (sigqueue_t));
     1042 +
     1043 +                return (0);
     1044 +        }
     1045 +
 993 1046          case B_SET_AFFINITY_MASK:
 994 1047          case B_GET_AFFINITY_MASK:
 995 1048                  /*
 996 1049                   * Retrieve or store the CPU affinity mask for the
 997 1050                   * requested linux pid.
 998 1051                   *
 999 1052                   * arg1 is a linux PID (0 means curthread).
1000 1053                   * arg2 is the size of the given mask.
1001 1054                   * arg3 is the address of the affinity mask.
1002 1055                   */
↓ open down ↓ 923 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX